In [1]:
import sys

sys.path.append('../../..')

In [2]:
import numpy
import scipy.stats

from lib.msra_loss import MSRALossFunctionAbs

In [3]:
class MCLossFunction(MSRALossFunctionAbs):
    def __init__(self, distrib, alpha, c=None):
        self.__alpha = alpha
        super(MCLossFunction, self).__init__(distrib, c)
        
    def shortfall_risk(self, m=None):
        m = self._check_argument(m)
        x_minus_m = numpy.subtract(self.x, m)
        
        mean_sum_ = numpy.mean(x_minus_m.sum(axis=1))
        
        pos_part = numpy.maximum(x_minus_m, 0.)
        pos_part_squared = numpy.square(pos_part)
        mean_sum_2_ = numpy.mean(pos_part_squared.sum(axis=1))
        
        to_add = 0.
        if self.__alpha != 0.:
            for i in xrange(self.dim):
                for j in xrange(i + 1, self.dim):
                    to_add += numpy.mean(numpy.multiply(pos_part[:, i], pos_part[:, j]))
                        
        return mean_sum_ + 0.5 * mean_sum_2_ + self.__alpha * to_add

    def shortfall_risk_jac(self, m):
        m = self._check_argument(m)
        x_minus_m = numpy.subtract(self.x, m)
        
        pos_part = numpy.maximum(x_minus_m, 0.)
        mean_pos_part = numpy.mean(pos_part, axis=0)
        
        if self.__alpha == 0.:
            return mean_pos_part + 1.
        
        dbl = []
        for i in xrange(self.dim):            
            indic_i = numpy.sign(pos_part[:, i])
            tmp = 0.
            for j in xrange(self.dim):
                if i != j:
                    tmp += numpy.mean(numpy.multiply(indic_i, pos_part[:, j]))
                
            dbl.append(self.__alpha * tmp)
        
        return mean_pos_part + 1. + dbl

In [4]:
M = 100000

mu = numpy.zeros(10)
sigma = numpy.array([[ 2.11,  0.37, -0.42, -0.2 , -1.73, -0.54,  0.42,  0.81, -0.2 , -0.94],
                     [ 0.37,  1.78, -0.45, -0.25, -1.73,  0.04,  0.35,  0.38, -0.91, -0.48],
                     [-0.42, -0.45,  0.87,  0.09,  0.4 , -0.05, -0.25,  0.06,  0.16,  0.45],
                     [-0.2 , -0.25,  0.09,  0.19,  0.38, -0.04, -0.11, -0.04,  0.22,  0.17],
                     [-1.73, -1.73,  0.4 ,  0.38,  5.3 ,  0.61,  0.46,  0.26,  1.74,  1.46],
                     [-0.54,  0.04, -0.05, -0.04,  0.61,  0.53,  0.1 , -0.3 , -0.1 ,  0.17],
                     [ 0.42,  0.35, -0.25, -0.11,  0.46,  0.1 ,  0.91,  0.51, -0.17, -0.25],
                     [ 0.81,  0.38,  0.06, -0.04,  0.26, -0.3 ,  0.51,  1.55,  0.49,  0.08],
                     [-0.2 , -0.91,  0.16,  0.22,  1.74, -0.1 , -0.17,  0.49,  1.33,  0.65],
                     [-0.94, -0.48,  0.45,  0.17,  1.46,  0.17, -0.25,  0.08,  0.65,  0.88]])


rv = scipy.stats.multivariate_normal(mean=mu, cov=sigma, allow_singular=True)
X = rv.rvs(size=M)

c = 1.

loss = MCLossFunction(X, 0., 1.)

In [5]:
from scipy.optimize import minimize

maxiter = 3500

In [6]:
guess = 0. * numpy.ones(loss.dim)

In [7]:
cons = ({'type': 'ineq',
         'fun' : lambda x: loss.ineq_constraint(x),
         'jac' : lambda x: loss.ineq_constraint_jac(x)})

In [8]:
res = minimize(loss.objective, guess, 
               jac=loss.objective_jac, 
               constraints=cons, 
               method='SLSQP',
               options={'maxiter': maxiter, 'disp': True})


Optimization terminated successfully.    (Exit mode 0)
            Current function value: 1.73184510748
            Iterations: 9
            Function evaluations: 10
            Gradient evaluations: 9
%%timeit res2 = minimize(loss.objective, guess, jac=loss.objective_jac, constraints=cons, method='SLSQP', options={'maxiter': maxiter})

In [9]:
print res
print
print loss.ineq_constraint(res.x)


     fun: 1.7318451074816463
     jac: array([ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  0.])
 message: 'Optimization terminated successfully.'
    nfev: 10
     nit: 9
    njev: 9
  status: 0
 success: True
       x: array([ 0.41862473,  0.29684273, -0.0588768 , -0.34309574,  1.33068063,
       -0.19498927, -0.03096563,  0.22686875,  0.13787517, -0.05111945])

-2.83558547309e-07